# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.676.14.23 -> 1.676.14.24 # kernel/softirq.c 1.11 -> 1.12 # kernel/timer.c 1.6 -> 1.7 # include/linux/irq_cpustat.h 1.4 -> 1.5 # # The following is the BitKeeper ChangeSet Log # -------------------------------------------- # 02/09/27 bjorn_helgaas@hp.com 1.676.14.24 # irq stat cleanup # -------------------------------------------- # diff -Nru a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h --- a/include/linux/irq_cpustat.h Wed Oct 8 09:07:23 2003 +++ b/include/linux/irq_cpustat.h Wed Oct 8 09:07:23 2003 @@ -23,15 +23,31 @@ #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #else #define __IRQ_STAT(cpu, member) ((void)(cpu), irq_stat[0].member) -#endif +#endif /* arch independent irq_stat fields */ #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) -#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) -#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) +#define irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) +#define bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ + +#define local_hardirq_trylock() hardirq_trylock(smp_processor_id()) +#define local_hardirq_endlock() hardirq_trylock(smp_processor_id()) +#define local_irq_enter(irq) irq_enter(smp_processor_id(), (irq)) +#define local_irq_exit(irq) irq_exit(smp_processor_id(), (irq)) +#define local_softirq_pending() softirq_pending(smp_processor_id()) +#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id()) + +/* These will lose the "really_" prefix when the interim macros below are removed. */ +#define really_local_irq_count()irq_count(smp_processor_id()) +#define really_local_bh_count() bh_count(smp_processor_id()) + +/* Interim macros for backward compatibility. They are deprecated. Use irq_count() and + bh_count() instead. --davidm 01/11/28 */ +#define local_irq_count(cpu) irq_count(cpu) +#define local_bh_count(cpu) bh_count(cpu) #endif /* __irq_cpustat_h */ diff -Nru a/kernel/softirq.c b/kernel/softirq.c --- a/kernel/softirq.c Wed Oct 8 09:07:23 2003 +++ b/kernel/softirq.c Wed Oct 8 09:07:23 2003 @@ -40,7 +40,10 @@ - Bottom halves: globally serialized, grr... */ +/* No separate irq_stat for ia64, it is part of PSA */ +#if !defined(CONFIG_IA64) irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; +#endif static struct softirq_action softirq_vec[32] __cacheline_aligned; @@ -60,7 +63,6 @@ asmlinkage void do_softirq() { - int cpu = smp_processor_id(); __u32 pending; unsigned long flags; __u32 mask; @@ -70,7 +72,7 @@ local_irq_save(flags); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending) { struct softirq_action *h; @@ -79,7 +81,7 @@ local_bh_disable(); restart: /* Reset the pending bitmask before enabling irqs */ - softirq_pending(cpu) = 0; + local_softirq_pending() = 0; local_irq_enable(); @@ -94,7 +96,7 @@ local_irq_disable(); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending & mask) { mask &= ~pending; goto restart; @@ -102,7 +104,7 @@ __local_bh_enable(); if (pending) - wakeup_softirqd(cpu); + wakeup_softirqd(smp_processor_id()); } local_irq_restore(flags); @@ -124,7 +126,7 @@ * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ - if (!(local_irq_count(cpu) | local_bh_count(cpu))) + if (!(irq_count(cpu) | bh_count(cpu))) wakeup_softirqd(cpu); } @@ -287,18 +289,16 @@ static void bh_action(unsigned long nr) { - int cpu = smp_processor_id(); - if (!spin_trylock(&global_bh_lock)) goto resched; - if (!hardirq_trylock(cpu)) + if (!local_hardirq_trylock()) goto resched_unlock; if (bh_base[nr]) bh_base[nr](); - hardirq_endlock(cpu); + local_hardirq_endlock(); spin_unlock(&global_bh_lock); return; @@ -377,15 +377,15 @@ __set_current_state(TASK_INTERRUPTIBLE); mb(); - ksoftirqd_task(cpu) = current; + local_ksoftirqd_task() = current; for (;;) { - if (!softirq_pending(cpu)) + if (!local_softirq_pending()) schedule(); __set_current_state(TASK_RUNNING); - while (softirq_pending(cpu)) { + while (local_softirq_pending()) { do_softirq(); if (current->need_resched) schedule(); diff -Nru a/kernel/timer.c b/kernel/timer.c --- a/kernel/timer.c Wed Oct 8 09:07:23 2003 +++ b/kernel/timer.c Wed Oct 8 09:07:23 2003 @@ -615,7 +615,7 @@ else kstat.per_cpu_user[cpu] += user_tick; kstat.per_cpu_system[cpu] += system; - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + } else if (really_local_bh_count() || really_local_irq_count() > 1) kstat.per_cpu_system[cpu] += system; }